---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-127-afe62aa5d5d9> in <module>()
1 for id in pred[pred['food']==-1].restaurant_id:
----> 2 i,a,b,c,d= getPrediction(id)
3 pred.iloc[id-1,1]= a
4 pred.iloc[id-1,2]= b
5 pred.iloc[id-1,3]= c
<ipython-input-121-bc461ddd612a> in getPrediction(id)
10 prediction["food"] = food_classifier.predict([review])[0]
11 prediction["service"] = service_classifier.predict([review])[0]
---> 12 prediction["amb"] = amb_classifier.predict([review])[0]
13 prediction["deals"] = deal_classifier.predict([review])[0]
14
C:\Anaconda3\lib\site-packages\sklearn\utils\metaestimators.py in <lambda>(*args, **kwargs)
52
53 # lambda, but not partial, allows help() to work with update_wrapper
---> 54 out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
55 # update the docstring of the returned function
56 update_wrapper(out, self.fn)
C:\Anaconda3\lib\site-packages\sklearn\pipeline.py in predict(self, X)
324 for name, transform in self.steps[:-1]:
325 if transform is not None:
--> 326 Xt = transform.transform(Xt)
327 return self.steps[-1][-1].predict(Xt)
328
C:\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in transform(self, raw_documents)
891
892 # use the same matrix-building strategy as fit_transform
--> 893 _, X = self._count_vocab(raw_documents, fixed_vocab=True)
894 if self.binary:
895 X.data.fill(1)
C:\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in _count_vocab(self, raw_documents, fixed_vocab)
760 for doc in raw_documents:
761 feature_counter = {}
--> 762 for feature in analyze(doc):
763 try:
764 feature_idx = vocabulary[feature]
C:\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in <lambda>(doc)
239
240 return lambda doc: self._word_ngrams(
--> 241 tokenize(preprocess(self.decode(doc))), stop_words)
242
243 else:
<ipython-input-30-3182fd224a51> in stemming_tokenizer(text)
8 f.close()
9 text = text.lower()
---> 10 words = TextBlob(text).correct().words
11 words = [Word(w).lemmatize("v") for w in words if not w in stopwords]
12 return words
C:\Anaconda3\lib\site-packages\textblob\blob.py in correct(self)
553 tokens = nltk.tokenize.regexp_tokenize(self.raw, "\w+|[^\w\s]|\s")
554 corrected = (Word(w).correct() for w in tokens)
--> 555 ret = ''.join(corrected)
556 return self.__class__(ret)
557
C:\Anaconda3\lib\site-packages\textblob\blob.py in <genexpr>(.0)
552 # regex matches: word or punctuation or whitespace
553 tokens = nltk.tokenize.regexp_tokenize(self.raw, "\w+|[^\w\s]|\s")
--> 554 corrected = (Word(w).correct() for w in tokens)
555 ret = ''.join(corrected)
556 return self.__class__(ret)
C:\Anaconda3\lib\site-packages\textblob\blob.py in correct(self)
125 .. versionadded:: 0.6.0
126 '''
--> 127 return Word(self.spellcheck()[0][0])
128
129 @cached_property
C:\Anaconda3\lib\site-packages\textblob\blob.py in spellcheck(self)
117 .. versionadded:: 0.6.0
118 '''
--> 119 return suggest(self.string)
120
121 def correct(self):
C:\Anaconda3\lib\site-packages\textblob\en\__init__.py in suggest(w)
121 """ Returns a list of (word, confidence)-tuples of spelling corrections.
122 """
--> 123 return spelling.suggest(w)
124
125 def polarity(s, **kwargs):
C:\Anaconda3\lib\site-packages\textblob\_text.py in suggest(self, w)
1396 candidates = self._known([w]) \
1397 or self._known(self._edit1(w)) \
-> 1398 or self._known(self._edit2(w)) \
1399 or [w]
1400 candidates = [(self.get(c, 0.0), c) for c in candidates]
C:\Anaconda3\lib\site-packages\textblob\_text.py in _edit2(self, w)
1373 # Of all spelling errors, 99% is covered by edit distance 2.
1374 # Only keep candidates that are actually known words (20% speedup).
-> 1375 return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
1376
1377 def _known(self, words=[]):
C:\Anaconda3\lib\site-packages\textblob\_text.py in <genexpr>(.0)
1373 # Of all spelling errors, 99% is covered by edit distance 2.
1374 # Only keep candidates that are actually known words (20% speedup).
-> 1375 return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
1376
1377 def _known(self, words=[]):
C:\Anaconda3\lib\site-packages\textblob\_text.py in _edit1(self, w)
1363 [a + b[1:] for a, b in split if b],
1364 [a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1],
-> 1365 [a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b],
1366 [a + c + b[0:] for a, b in split for c in Spelling.ALPHA]
1367 )
C:\Anaconda3\lib\site-packages\textblob\_text.py in <listcomp>(.0)
1363 [a + b[1:] for a, b in split if b],
1364 [a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1],
-> 1365 [a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b],
1366 [a + c + b[0:] for a, b in split for c in Spelling.ALPHA]
1367 )
KeyboardInterrupt: